bitkeeper revision 1.171 (3e9d272dy0ZnTPeYx-n2Qx8CXJLFHw)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 16 Apr 2003 09:49:33 +0000 (09:49 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Wed, 16 Apr 2003 09:49:33 +0000 (09:49 +0000)
sched.h, memory.c, traps.c:
  Allow paging out of current LDT pages. Also: flush the shadow LDT mappings on a pagetable switch.

xen/arch/i386/traps.c
xen/common/memory.c
xen/include/xeno/sched.h

index 5d49f588f8acb184b4f459d8ddef63489a4d04fa..3fbc6edcae4af41a7db0776bffb5374b1650f68f 100644 (file)
@@ -259,14 +259,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
 
     __asm__ __volatile__ ("movl %%cr2,%0" : "=r" (addr) : );
 
-    if ( unlikely(!(regs->xcs & 3)) )
-        goto fault_in_hypervisor;
-
     if ( unlikely(addr > PAGE_OFFSET) )
         goto fault_in_xen_space;
 
  bounce_fault:
 
+    if ( unlikely(!(regs->xcs & 3)) )
+        goto fault_in_hypervisor;
+
     ti = p->thread.traps + 14;
     gtb->flags = GTBF_TRAP_CR2; /* page fault pushes %cr2 */
     gtb->cr2        = addr;
@@ -275,7 +275,12 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
     gtb->eip        = ti->address;
     return; 
 
-
+    /*
+     * FAULT IN XEN ADDRESS SPACE:
+     *  We only deal with one kind -- a fault in the shadow LDT mapping.
+     *  If this occurs we pull a mapping from the guest's LDT, if it is
+     *  valid. Otherwise we send the fault up to the guest OS to be handled.
+     */
  fault_in_xen_space:
 
     if ( (addr < LDT_VIRT_START) || 
@@ -316,9 +321,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs, long error_code)
         page->flags |= PGT_ldt_page;
     }
 
+    /* Success! */
     get_page_type(page);
     get_page_tot(page);
     p->mm.perdomain_pt[l1_table_offset(off)+16] = mk_l1_pgentry(l1e|_PAGE_RW);
+    p->mm.shadow_ldt_mapcnt++;
 
     spin_unlock(&p->page_lock);
     return;
index 5684aada28e39303d7d628af05de38b531644ca4..637112d2418caecdb3fd31a9f995b01569fab892 100644 (file)
@@ -209,7 +209,9 @@ struct list_head free_list;
 spinlock_t free_list_lock = SPIN_LOCK_UNLOCKED;
 unsigned int free_pfns;
 
-static int tlb_flush[NR_CPUS];
+/* Used to defer flushing of memory structures. */
+static int flush_tlb[NR_CPUS] __cacheline_aligned;
+
 
 /*
  * init_frametable:
@@ -222,7 +224,7 @@ void __init init_frametable(unsigned long nr_pages)
     unsigned long page_index;
     unsigned long flags;
 
-    memset(tlb_flush, 0, sizeof(tlb_flush));
+    memset(flush_tlb, 0, sizeof(flush_tlb));
 
     max_page = nr_pages;
     frame_table_size = nr_pages * sizeof(struct pfn_info);
@@ -247,6 +249,34 @@ void __init init_frametable(unsigned long nr_pages)
 }
 
 
+static void __invalidate_shadow_ldt(void)
+{
+    int i;
+    unsigned long pfn;
+    struct pfn_info *page;
+    
+    current->mm.shadow_ldt_mapcnt = 0;
+
+    for ( i = 16; i < 32; i++ )
+    {
+        pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]);
+        if ( pfn == 0 ) continue;
+        current->mm.perdomain_pt[i] = mk_l1_pgentry(0);
+        page = frame_table + pfn;
+        ASSERT((page->flags & PG_type_mask) == PGT_ldt_page);
+        ASSERT((page->flags & PG_domain_mask) == current->domain);
+        ASSERT((page->type_count != 0) && (page->tot_count != 0));
+        put_page_type(page);
+        put_page_tot(page);                
+    }
+}
+static inline void invalidate_shadow_ldt(void)
+{
+    if ( current->mm.shadow_ldt_mapcnt != 0 )
+        __invalidate_shadow_ldt();
+}
+
+
 /* Return original refcnt, or -1 on error. */
 static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
 {
@@ -283,6 +313,7 @@ static int inc_page_refcnt(unsigned long page_nr, unsigned int type)
     return get_page_type(page);
 }
 
+
 /* Return new refcnt, or -1 on error. */
 static int dec_page_refcnt(unsigned long page_nr, unsigned int type)
 {
@@ -373,6 +404,7 @@ static int get_l2_table(unsigned long page_nr)
     return ret;
 }
 
+
 static int get_l1_table(unsigned long page_nr)
 {
     l1_pgentry_t *p_l1_entry, l1_entry;
@@ -408,6 +440,7 @@ static int get_l1_table(unsigned long page_nr)
     return ret;
 }
 
+
 static int get_page(unsigned long page_nr, int writeable)
 {
     struct pfn_info *page;
@@ -450,6 +483,7 @@ static int get_page(unsigned long page_nr, int writeable)
     return(0);
 }
 
+
 static void put_l2_table(unsigned long page_nr)
 {
     l2_pgentry_t *p_l2_entry, l2_entry;
@@ -469,6 +503,7 @@ static void put_l2_table(unsigned long page_nr)
     unmap_domain_mem(p_l2_entry);
 }
 
+
 static void put_l1_table(unsigned long page_nr)
 {
     l1_pgentry_t *p_l1_entry, l1_entry;
@@ -492,6 +527,7 @@ static void put_l1_table(unsigned long page_nr)
     unmap_domain_mem(p_l1_entry-1);
 }
 
+
 static void put_page(unsigned long page_nr, int writeable)
 {
     struct pfn_info *page;
@@ -502,10 +538,19 @@ static void put_page(unsigned long page_nr, int writeable)
            ((page_type_count(page) != 0) && 
             ((page->flags & PG_type_mask) == PGT_writeable_page) &&
             ((page->flags & PG_need_flush) == PG_need_flush)));
-    if ( writeable && (put_page_type(page) == 0) )
+    if ( writeable )
+    {
+        if ( put_page_type(page) == 0 )
+        {
+            flush_tlb[smp_processor_id()] = 1;
+            page->flags &= ~PG_need_flush;
+        }
+    }
+    else if ( unlikely(((page->flags & PG_type_mask) == PGT_ldt_page) &&
+                       (page_type_count(page) != 0)) )
     {
-        tlb_flush[smp_processor_id()] = 1;
-        page->flags &= ~PG_need_flush;
+        /* We expect this is rare so we just blow the entire shadow LDT. */
+        invalidate_shadow_ldt();
     }
     put_page_tot(page);
 }
@@ -685,15 +730,17 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
         {
             put_l2_table(pagetable_val(current->mm.pagetable) >> PAGE_SHIFT);
             current->mm.pagetable = mk_pagetable(pfn << PAGE_SHIFT);
+            invalidate_shadow_ldt();
+            flush_tlb[smp_processor_id()] = 1;
         }
         else
         {
             MEM_LOG("Error while installing new baseptr %08lx %d", ptr, err);
         }
-        /* fall through */
+        break;
         
     case PGEXT_TLB_FLUSH:
-        tlb_flush[smp_processor_id()] = 1;
+        flush_tlb[smp_processor_id()] = 1;
         break;
     
     case PGEXT_INVLPG:
@@ -702,7 +749,6 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
 
     case PGEXT_SET_LDT:
     {
-        int i;
         unsigned long ents = val >> PGEXT_CMD_SHIFT;
         if ( ((ptr & (PAGE_SIZE-1)) != 0) || 
              (ents > 8192) ||
@@ -717,20 +763,8 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
         {
             if ( current->mm.ldt_ents != 0 )
             {
-                /* Tear down the old LDT. */
-                for ( i = 16; i < 32; i++ )
-                {
-                    pfn = l1_pgentry_to_pagenr(current->mm.perdomain_pt[i]);
-                    if ( pfn == 0 ) continue;
-                    current->mm.perdomain_pt[i] = mk_l1_pgentry(0);
-                    page = frame_table + pfn;
-                    ASSERT((page->flags & PG_type_mask) == PGT_ldt_page);
-                    ASSERT((page->flags & PG_domain_mask) == current->domain);
-                    ASSERT((page->type_count != 0) && (page->tot_count != 0));
-                    put_page_type(page);
-                    put_page_tot(page);                
-                }
-                tlb_flush[smp_processor_id()] = 1;
+                invalidate_shadow_ldt();
+                flush_tlb[smp_processor_id()] = 1;
             }
             current->mm.ldt_base = ptr;
             current->mm.ldt_ents = ents;
@@ -748,6 +782,7 @@ static int do_extended_command(unsigned long ptr, unsigned long val)
     return err;
 }
 
+
 int do_process_page_updates(page_update_request_t *ureqs, int count)
 {
     page_update_request_t req;
@@ -860,9 +895,9 @@ int do_process_page_updates(page_update_request_t *ureqs, int count)
         ureqs++;
     }
 
-    if ( tlb_flush[smp_processor_id()] )
+    if ( flush_tlb[smp_processor_id()] )
     {
-        tlb_flush[smp_processor_id()] = 0;
+        flush_tlb[smp_processor_id()] = 0;
         __write_cr3_counted(pagetable_val(current->mm.pagetable));
 
     }
index 8db8cdc1fb6d34c0d4d96faebbb9a4bc75d558b1..3071830a0494e5051820c29e92346618a71382c5 100644 (file)
@@ -30,7 +30,7 @@ struct mm_struct {
     l1_pgentry_t *perdomain_pt;
     pagetable_t  pagetable;
     /* Current LDT details. */
-    unsigned long ldt_base, ldt_ents;
+    unsigned long ldt_base, ldt_ents, shadow_ldt_mapcnt;
     /* Next entry is passed to LGDT on domain switch. */
     char gdt[6];
 };